DEFINE(IA64_PT_REGS_R14_OFFSET, offsetof (struct pt_regs, r14));
DEFINE(IA64_PT_REGS_R2_OFFSET, offsetof (struct pt_regs, r2));
DEFINE(IA64_PT_REGS_R3_OFFSET, offsetof (struct pt_regs, r3));
-#ifdef CONFIG_VTI
- DEFINE(IA64_PT_REGS_R4_OFFSET, offsetof (struct pt_regs, r4));
- DEFINE(IA64_PT_REGS_R5_OFFSET, offsetof (struct pt_regs, r5));
- DEFINE(IA64_PT_REGS_R6_OFFSET, offsetof (struct pt_regs, r6));
- DEFINE(IA64_PT_REGS_R7_OFFSET, offsetof (struct pt_regs, r7));
- DEFINE(IA64_PT_REGS_CR_IIPA_OFFSET, offsetof (struct pt_regs, cr_iipa));
- DEFINE(IA64_PT_REGS_CR_ISR_OFFSET, offsetof (struct pt_regs, cr_isr));
- DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct pt_regs, eml_unat));
- DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct pt_regs, rfi_pfs));
- DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mpta));
-#endif //CONFIG_VTI
DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17));
DEFINE(IA64_PT_REGS_R18_OFFSET, offsetof (struct pt_regs, r18));
DEFINE(IA64_PT_REGS_F9_OFFSET, offsetof (struct pt_regs, f9));
DEFINE(IA64_PT_REGS_F10_OFFSET, offsetof (struct pt_regs, f10));
DEFINE(IA64_PT_REGS_F11_OFFSET, offsetof (struct pt_regs, f11));
+ DEFINE(IA64_PT_REGS_R4_OFFSET, offsetof (struct pt_regs, r4));
+ DEFINE(IA64_PT_REGS_R5_OFFSET, offsetof (struct pt_regs, r5));
+ DEFINE(IA64_PT_REGS_R6_OFFSET, offsetof (struct pt_regs, r6));
+ DEFINE(IA64_PT_REGS_R7_OFFSET, offsetof (struct pt_regs, r7));
+ DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct pt_regs, eml_unat));
+ DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct pt_regs, rfi_pfs));
+ DEFINE(IA64_VCPU_IIPA_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cr_iipa));
+ DEFINE(IA64_VCPU_ISR_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cr_isr));
+ DEFINE(IA64_VCPU_CAUSE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cause));
+ DEFINE(IA64_VCPU_OPCODE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.opcode));
+ DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mpta));
+ DEFINE(IA64_PT_REGS_R16_SLOT, (((offsetof(struct pt_regs, r16)-sizeof(struct pt_regs))>>3)&0x3f));
+ DEFINE(IA64_VCPU_FLAGS_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.flags));
BLANK();
// new domains are cloned but not exec'ed so switch to user mode here
cmp.ne pKStk,pUStk=r0,r0
#ifdef CONFIG_VTI
- br.cond.spnt ia64_leave_hypervisor
+ br.cond.spnt ia64_leave_hypervisor
#else // CONFIG_VTI
- br.cond.spnt ia64_leave_kernel
+ br.cond.spnt ia64_leave_kernel
#endif // CONFIG_VTI
+
+// adds r16 = IA64_VCPU_FLAGS_OFFSET, r13
+// ;;
+// ld8 r16 = [r16]
+// ;;
+// cmp.ne p6,p7 = r16, r0
+// (p6) br.cond.spnt ia64_leave_hypervisor
+// (p7) br.cond.spnt ia64_leave_kernel
+// ;;
#else
.ret8:
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
#ifdef XEN
alloc loc0=ar.pfs,0,1,1,0
adds out0=16,r12
+ adds r7 = PT(EML_UNAT)+16,r12
;;
+ ld8 r7 = [r7]
(p6) br.call.sptk.many b0=deliver_pending_interrupt
+ ;;
mov ar.pfs=loc0
+ mov ar.unat=r7 /* load eml_unat */
mov r31=r0
+
+
#else
adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
;;
ldf.fill f6=[r2],PT(F7)-PT(F6)
;;
ldf.fill f7=[r2],PT(F11)-PT(F7)
+#ifdef XEN
+ ldf.fill f8=[r3],PT(R5)-PT(F8)
+ ;;
+ ldf.fill f11=[r2],PT(R4)-PT(F11)
+ mov ar.ccv=r15
+ ;;
+ ld8.fill r4=[r2],16
+ ld8.fill r5=[r3],16
+ ;;
+ ld8.fill r6=[r2]
+ ld8.fill r7=[r3]
+ ;;
+ srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned)
+ ;;
+ bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
+ ;;
+#else
ldf.fill f8=[r3],32
;;
srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned)
ldf.fill f11=[r2]
bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
;;
+#endif
#ifdef XEN
(pUStk) movl r18=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
(pUStk) ld8 r18=[r18]
#define PT(f) (IA64_PT_REGS_##f##_OFFSET)
#define SW(f) (IA64_SWITCH_STACK_##f##_OFFSET)
-
-#ifdef XEN
-#ifdef CONFIG_VTI
-#define PRED_EMUL 2 /* Need to save r4-r7 for inst emulation */
-#define PRED_NON_EMUL 3 /* No need to save r4-r7 for normal path */
-#define PRED_BN0 6 /* Guest is in bank 0 */
-#define PRED_BN1 7 /* Guest is in bank 1 */
-# define pEml PASTE(p,PRED_EMUL)
-# define pNonEml PASTE(p,PRED_NON_EMUL)
-# define pBN0 PASTE(p,PRED_BN0)
-# define pBN1 PASTE(p,PRED_BN1)
+#ifdef XEN
#define VPD(f) (VPD_##f##_START_OFFSET)
-#endif // CONFIG_VTI
#endif
#define PT_REGS_SAVES(off) \
stf.spill [r2]=f8,32; \
stf.spill [r3]=f9,32; \
;; \
- stf.spill [r2]=f10; \
- stf.spill [r3]=f11; \
- adds r25=PT(B7)-PT(F11),r3; \
+ stf.spill [r2]=f10,32; \
+ stf.spill [r3]=f11,24; \
;; \
+.mem.offset 0,0; st8.spill [r2]=r4,16; \
+.mem.offset 8,0; st8.spill [r3]=r5,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2]=r6,16; \
+.mem.offset 8,0; st8.spill [r3]=r7; \
+ adds r25=PT(B7)-PT(R7),r3; \
+ ;; \
st8 [r24]=r18,16; /* b6 */ \
st8 [r25]=r19,16; /* b7 */ \
;; \
st8 [r24]=r9; /* ar.csd */ \
+ mov r26=ar.unat; \
+ ;; \
st8 [r25]=r10; /* ar.ssd */ \
- ;;
+ st8 [r2]=r26; /* eml_unat */ \
+ ;;
#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,)
#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
RPT(r1), RPT(r2), RPT(r3),
-#if defined(XEN) && defined(CONFIG_VTI)
+//#if defined(XEN) && defined(CONFIG_VTI)
+#if defined(XEN)
RPT(r4), RPT(r5), RPT(r6), RPT(r7),
#else //CONFIG_VTI
RSW(r4), RSW(r5), RSW(r6), RSW(r7),
return reg;
}
-#if defined(XEN) && defined(CONFIG_VTI)
+//#if defined(XEN) && defined(CONFIG_VTI)
+#if defined(XEN)
void
set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, unsigned long nat)
{
bspstore = ia64_get_bspstore();
}
*val=*addr;
- if(bspstore < rnat_addr){
- *nat=!!(ia64_get_rnat()&nat_mask);
- }else{
- *nat = !!((*rnat_addr)&nat_mask);
+ if(nat){
+ if(bspstore < rnat_addr){
+ *nat=!!(ia64_get_rnat()&nat_mask);
+ }else{
+ *nat = !!((*rnat_addr)&nat_mask);
+ }
+ ia64_set_rsc(old_rsc);
}
- ia64_set_rsc(old_rsc);
}
#else // CONFIG_VTI
size=(inst.M1.x6&0x3);
if((inst.M1.x6>>2)>0xb){ // write
dir=IOREQ_WRITE; //write
- vmx_vcpu_get_gr(vcpu,inst.M4.r2,&data);
+ vcpu_get_gr_nat(vcpu,inst.M4.r2,&data);
}else if((inst.M1.x6>>2)<0xb){ // read
dir=IOREQ_READ;
- vmx_vcpu_get_gr(vcpu,inst.M1.r1,&value);
+ vcpu_get_gr_nat(vcpu,inst.M1.r1,&value);
}
}
// Integer Load + Reg update
inst_type = SL_INTEGER;
dir = IOREQ_READ; //write
size = (inst.M2.x6&0x3);
- vmx_vcpu_get_gr(vcpu,inst.M2.r1,&value);
- vmx_vcpu_get_gr(vcpu,inst.M2.r3,&temp);
- vmx_vcpu_get_gr(vcpu,inst.M2.r2,&post_update);
+ vcpu_get_gr_nat(vcpu,inst.M2.r1,&value);
+ vcpu_get_gr_nat(vcpu,inst.M2.r3,&temp);
+ vcpu_get_gr_nat(vcpu,inst.M2.r2,&post_update);
temp += post_update;
- vmx_vcpu_set_gr(vcpu,inst.M2.r3,temp,0);
+ vcpu_set_gr(vcpu,inst.M2.r3,temp,0);
}
// Integer Load/Store + Imm update
else if(inst.M3.major==5){
size=(inst.M3.x6&0x3);
if((inst.M5.x6>>2)>0xb){ // write
dir=IOREQ_WRITE; //write
- vmx_vcpu_get_gr(vcpu,inst.M5.r2,&data);
- vmx_vcpu_get_gr(vcpu,inst.M5.r3,&temp);
+ vcpu_get_gr_nat(vcpu,inst.M5.r2,&data);
+ vcpu_get_gr_nat(vcpu,inst.M5.r3,&temp);
post_update = (inst.M5.i<<7)+inst.M5.imm7;
if(inst.M5.s)
temp -= post_update;
else
temp += post_update;
- vmx_vcpu_set_gr(vcpu,inst.M5.r3,temp,0);
+ vcpu_set_gr(vcpu,inst.M5.r3,temp,0);
}else if((inst.M3.x6>>2)<0xb){ // read
dir=IOREQ_READ;
- vmx_vcpu_get_gr(vcpu,inst.M3.r1,&value);
- vmx_vcpu_get_gr(vcpu,inst.M3.r3,&temp);
+ vcpu_get_gr_nat(vcpu,inst.M3.r1,&value);
+ vcpu_get_gr_nat(vcpu,inst.M3.r3,&temp);
post_update = (inst.M3.i<<7)+inst.M3.imm7;
if(inst.M3.s)
temp -= post_update;
else
temp += post_update;
- vmx_vcpu_set_gr(vcpu,inst.M3.r3,temp,0);
+ vcpu_set_gr(vcpu,inst.M3.r3,temp,0);
}
}
data = (value & 0xffffffff00000000U) | (data & 0xffffffffU);
if(inst_type==SL_INTEGER){ //gp
- vmx_vcpu_set_gr(vcpu,inst.M1.r1,data,0);
+ vcpu_set_gr(vcpu,inst.M1.r1,data,0);
}else{
panic("Don't support ldfd now !");
/* switch(inst.M6.f1){
get_pal_parameters (VCPU *vcpu, UINT64 *gr29,
UINT64 *gr30, UINT64 *gr31) {
- vmx_vcpu_get_gr(vcpu,29,gr29);
- vmx_vcpu_get_gr(vcpu,30,gr30);
- vmx_vcpu_get_gr(vcpu,31,gr31);
+ vcpu_get_gr_nat(vcpu,29,gr29);
+ vcpu_get_gr_nat(vcpu,30,gr30);
+ vcpu_get_gr_nat(vcpu,31,gr31);
}
static void
set_pal_result (VCPU *vcpu,struct ia64_pal_retval result) {
- vmx_vcpu_set_gr(vcpu,8, result.status,0);
- vmx_vcpu_set_gr(vcpu,9, result.v0,0);
- vmx_vcpu_set_gr(vcpu,10, result.v1,0);
- vmx_vcpu_set_gr(vcpu,11, result.v2,0);
+ vcpu_set_gr(vcpu,8, result.status,0);
+ vcpu_set_gr(vcpu,9, result.v0,0);
+ vcpu_set_gr(vcpu,10, result.v1,0);
+ vcpu_set_gr(vcpu,11, result.v2,0);
}
struct ia64_pal_retval result;
get_pal_parameters (vcpu, &gr29, &gr30, &gr31);
- vmx_vcpu_get_gr(vcpu,28,&gr28);
+ vcpu_get_gr_nat(vcpu,28,&gr28);
/* Always call Host Pal in int=1 */
gr30 = gr30 &(~(0x2UL));
struct ia64_pal_retval result;
- vmx_vcpu_get_gr(vcpu,28,&gr28); //bank1
+ vcpu_get_gr_nat(vcpu,28,&gr28); //bank1
switch (gr28) {
case PAL_CACHE_FLUSH:
hcb = vmx_vcpu_get_vtlb(vcpu);
vrr=vmx_vcpu_rr(vcpu,vadr);
regs=vcpu_regs(vcpu);
- pt_isr.val=regs->cr_isr;
+ pt_isr.val=VMX(vcpu,cr_isr);
visr.val=0;
visr.ei=pt_isr.ei;
visr.ir=pt_isr.ir;
;;
adds r21=PT(PR)+16,r12
;;
-
lfetch [r21],PT(CR_IPSR)-PT(PR)
adds r2=PT(B6)+16,r12
adds r3=PT(R16)+16,r12
;;
alloc loc0=ar.pfs,0,1,1,0
adds out0=16,r12
+ adds r7 = PT(EML_UNAT)+16,r12
;;
+ ld8 r7 = [r7]
br.call.sptk.many b0=leave_hypervisor_tail
;;
mov ar.pfs=loc0
- adds r8=IA64_VPD_BASE_OFFSET,r13
- ;;
- ld8 r8=[r8]
- ;;
- adds r9=VPD(VPSR),r8
- ;;
- ld8 r9=[r9]
- ;;
- tbit.z pBN0,pBN1=r9,IA64_PSR_BN_BIT
- ;;
-(pBN0) add r7=VPD(VBNAT),r8;
-(pBN1) add r7=VPD(VNAT),r8;
- ;;
- ld8 r7=[r7]
- ;;
mov ar.unat=r7
-(pBN0) add r4=VPD(VBGR),r8;
-(pBN1) add r4=VPD(VGR),r8;
-(pBN0) add r5=VPD(VBGR)+0x8,r8;
-(pBN1) add r5=VPD(VGR)+0x8,r8;
- ;;
- ld8.fill r16=[r4],16
- ld8.fill r17=[r5],16
- ;;
- ld8.fill r18=[r4],16
- ld8.fill r19=[r5],16
- ;;
- ld8.fill r20=[r4],16
- ld8.fill r21=[r5],16
- ;;
- ld8.fill r22=[r4],16
- ld8.fill r23=[r5],16
- ;;
- ld8.fill r24=[r4],16
- ld8.fill r25=[r5],16
- ;;
- ld8.fill r26=[r4],16
- ld8.fill r27=[r5],16
- ;;
- ld8.fill r28=[r4],16
- ld8.fill r29=[r5],16
- ;;
- ld8.fill r30=[r4],16
- ld8.fill r31=[r5],16
- ;;
- bsw.0
- ;;
- mov r18=r8 //vpd
- mov r19=r9 //vpsr
adds r20=PT(PR)+16,r12
;;
lfetch [r20],PT(CR_IPSR)-PT(PR)
- adds r16=PT(B6)+16,r12
- adds r17=PT(B7)+16,r12
+ adds r2 = PT(B6)+16,r12
+ adds r3 = PT(B7)+16,r12
;;
lfetch [r20]
- mov r21=r13 // get current
;;
- ld8 r30=[r16],16 // load b6
- ld8 r31=[r17],16 // load b7
- add r20=PT(EML_UNAT)+16,r12
+ ld8 r24=[r2],16 /* B6 */
+ ld8 r25=[r3],16 /* B7 */
;;
- ld8 r29=[r20] //load ar_unat
- mov b6=r30
- mov b7=r31
- ld8 r30=[r16],16 //load ar_csd
- ld8 r31=[r17],16 //load ar_ssd
+ ld8 r26=[r2],16 /* ar_csd */
+ ld8 r27=[r3],16 /* ar_ssd */
+ mov b6 = r24
;;
- mov ar.unat=r29
- mov ar.csd=r30
- mov ar.ssd=r31
+ ld8.fill r8=[r2],16
+ ld8.fill r9=[r3],16
+ mov b7 = r25
;;
- ld8.fill r8=[r16],16 //load r8
- ld8.fill r9=[r17],16 //load r9
+ mov ar.csd = r26
+ mov ar.ssd = r27
;;
- ld8.fill r10=[r16],PT(R1)-PT(R10) //load r10
- ld8.fill r11=[r17],PT(R12)-PT(R11) //load r11
+ ld8.fill r10=[r2],PT(R15)-PT(R10)
+ ld8.fill r11=[r3],PT(R14)-PT(R11)
;;
- ld8.fill r1=[r16],16 //load r1
- ld8.fill r12=[r17],16 //load r12
+ ld8.fill r15=[r2],PT(R16)-PT(R15)
+ ld8.fill r14=[r3],PT(R17)-PT(R14)
;;
- ld8.fill r13=[r16],16 //load r13
- ld8 r30=[r17],16 //load ar_fpsr
+ ld8.fill r16=[r2],16
+ ld8.fill r17=[r3],16
;;
- ld8.fill r15=[r16],16 //load r15
- ld8.fill r14=[r17],16 //load r14
- mov ar.fpsr=r30
+ ld8.fill r18=[r2],16
+ ld8.fill r19=[r3],16
;;
- ld8.fill r2=[r16],16 //load r2
- ld8.fill r3=[r17],16 //load r3
+ ld8.fill r20=[r2],16
+ ld8.fill r21=[r3],16
;;
-/*
-(pEml) ld8.fill r4=[r16],16 //load r4
-(pEml) ld8.fill r5=[r17],16 //load r5
+ ld8.fill r22=[r2],16
+ ld8.fill r23=[r3],16
;;
-(pEml) ld8.fill r6=[r16],PT(AR_CCV)-PT(R6) //load r6
-(pEml) ld8.fill r7=[r17],PT(F7)-PT(R7) //load r7
+ ld8.fill r24=[r2],16
+ ld8.fill r25=[r3],16
;;
-(pNonEml) adds r16=PT(AR_CCV)-PT(R4),r16
-(pNonEml) adds r17=PT(F7)-PT(R5),r17
+ ld8.fill r26=[r2],16
+ ld8.fill r27=[r3],16
;;
-*/
- ld8.fill r4=[r16],16 //load r4
- ld8.fill r5=[r17],16 //load r5
- ;;
- ld8.fill r6=[r16],PT(AR_CCV)-PT(R6) //load r6
- ld8.fill r7=[r17],PT(F7)-PT(R7) //load r7
+ ld8.fill r28=[r2],16
+ ld8.fill r29=[r3],16
+ ;;
+ ld8.fill r30=[r2],PT(F6)-PT(R30)
+ ld8.fill r31=[r3],PT(F7)-PT(R31)
;;
-
- ld8 r30=[r16],PT(F6)-PT(AR_CCV)
rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
+ invala // invalidate ALAT
;;
- srlz.i // ensure interruption collection is off
+ ldf.fill f6=[r2],32
+ ldf.fill f7=[r3],32
;;
- invala // invalidate ALAT
+ ldf.fill f8=[r2],32
+ ldf.fill f9=[r3],32
;;
- ldf.fill f6=[r16],32
- ldf.fill f7=[r17],32
+ ldf.fill f10=[r2],32
+ ldf.fill f11=[r3],24
;;
- ldf.fill f8=[r16],32
- ldf.fill f9=[r17],32
+ ld8.fill r4=[r2],16 //load r4
+ ld8.fill r5=[r3],16 //load r5
;;
- ldf.fill f10=[r16]
- ldf.fill f11=[r17]
+ ld8.fill r6=[r2] //load r6
+ ld8.fill r7=[r3] //load r7
;;
- mov ar.ccv=r30
- adds r16=PT(CR_IPSR)-PT(F10),r16
- adds r17=PT(CR_IIP)-PT(F11),r17
+ srlz.i // ensure interruption collection is off
+ ;;
+ bsw.0
+ ;;
+ adds r16 = PT(CR_IPSR)+16,r12
+ adds r17 = PT(CR_IIP)+16,r12
+ mov r21=r13 // get current
;;
ld8 r31=[r16],16 // load cr.ipsr
ld8 r30=[r17],16 // load cr.iip
ld8 r27=[r16],16 // load ar.pfs
ld8 r26=[r17],16 // load ar.rsc
;;
- ld8 r25=[r16],16 // load ar.rnat (may be garbage)
- ld8 r24=[r17],16// load ar.bspstore (may be garbage)
+ ld8 r25=[r16],16 // load ar.rnat
+ ld8 r24=[r17],16 // load ar.bspstore
;;
ld8 r23=[r16],16 // load predicates
- ld8 r22=[r17],PT(RFI_PFS)-PT(B0) // load b0
+ ld8 r22=[r17],16 // load b0
;;
ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
+ ld8.fill r1=[r17],16 //load r1
+ ;;
+ ld8.fill r12=[r16],16 //load r12
+ ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
+ ;;
+ ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
+ ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
+ ;;
+ ld8.fill r3=[r16] //load r3
+ ld8 r18=[r17],PT(RFI_PFS)-PT(AR_CCV) //load ar_ccv
+ ;;
+ mov ar.fpsr=r19
+ mov ar.ccv=r18
;;
//rbs_switch
// loadrs has already been shifted
;;
vmx_dorfirfi_back:
mov ar.pfs=r27
-
+ adds r18=IA64_VPD_BASE_OFFSET,r21
+ ;;
+ ld8 r18=[r18] //vpd
+ ;;
+ adds r19=VPD(VPSR),r18
+ ;;
+ ld8 r19=[r19] //vpsr
//vsa_sync_write_start
movl r20=__vsa_base
;;
void hyper_not_support(void)
{
VCPU *vcpu=current;
- vmx_vcpu_set_gr(vcpu, 8, -1, 0);
+ vcpu_set_gr(vcpu, 8, -1, 0);
vmx_vcpu_increment_iip(vcpu);
}
{
VCPU *vcpu=current;
u64 r32,r33,r34,r35,ret;
- vmx_vcpu_get_gr(vcpu,16,&r32);
- vmx_vcpu_get_gr(vcpu,17,&r33);
- vmx_vcpu_get_gr(vcpu,18,&r34);
- vmx_vcpu_get_gr(vcpu,19,&r35);
+ vcpu_get_gr_nat(vcpu,16,&r32);
+ vcpu_get_gr_nat(vcpu,17,&r33);
+ vcpu_get_gr_nat(vcpu,18,&r34);
+ vcpu_get_gr_nat(vcpu,19,&r35);
ret=do_mmu_update((mmu_update_t*)r32,r33,r34,r35);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
panic("PREEMPT happen in multicall\n"); // Not support yet
} else {
- vmx_vcpu_set_gr(vcpu, 15, op, 0);
+ vcpu_set_gr(vcpu, 15, op, 0);
for ( i = 0; i < nr_args; i++) {
switch (i) {
- case 0: vmx_vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long), 0);
+ case 0: vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long), 0);
break;
- case 1: vmx_vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long), 0);
+ case 1: vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long), 0);
break;
- case 2: vmx_vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long), 0);
+ case 2: vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long), 0);
break;
- case 3: vmx_vcpu_set_gr(vcpu, 19, va_arg(args, unsigned long), 0);
+ case 3: vcpu_set_gr(vcpu, 19, va_arg(args, unsigned long), 0);
break;
- case 4: vmx_vcpu_set_gr(vcpu, 20, va_arg(args, unsigned long), 0);
+ case 4: vcpu_set_gr(vcpu, 20, va_arg(args, unsigned long), 0);
break;
default: panic("Too many args for hypercall continuation\n");
break;
VCPU *vcpu=current;
u64 r32,r33,r34,r35,r36;
u64 ret;
- vmx_vcpu_get_gr(vcpu,16,&r32);
- vmx_vcpu_get_gr(vcpu,17,&r33);
- vmx_vcpu_get_gr(vcpu,18,&r34);
- vmx_vcpu_get_gr(vcpu,19,&r35);
- vmx_vcpu_get_gr(vcpu,20,&r36);
+ vcpu_get_gr_nat(vcpu,16,&r32);
+ vcpu_get_gr_nat(vcpu,17,&r33);
+ vcpu_get_gr_nat(vcpu,18,&r34);
+ vcpu_get_gr_nat(vcpu,19,&r35);
+ vcpu_get_gr_nat(vcpu,20,&r36);
// ret=do_dom_mem_op(r32,(u64 *)r33,r34,r35,r36);
ret = 0;
printf("do_dom_mem return value: %lx\n", ret);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
/* Hard to define a special return value to indicate hypercall restart.
* So just add a new mark, which is SMP safe
{
VCPU *vcpu=current;
u64 r32,ret;
- vmx_vcpu_get_gr(vcpu,16,&r32);
+ vcpu_get_gr_nat(vcpu,16,&r32);
ret=do_sched_op(r32);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
{
VCPU *vcpu=current;
u64 r32,ret;
- vmx_vcpu_get_gr(vcpu,16,&r32);
+ vcpu_get_gr_nat(vcpu,16,&r32);
ret=do_dom0_op((dom0_op_t *)r32);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
{
VCPU *vcpu=current;
u64 r32,ret;
- vmx_vcpu_get_gr(vcpu,16,&r32);
+ vcpu_get_gr_nat(vcpu,16,&r32);
ret=do_event_channel_op((evtchn_op_t *)r32);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
{
VCPU *vcpu=current;
u64 r32,ret;
- vmx_vcpu_get_gr(vcpu,16,&r32);
+ vcpu_get_gr_nat(vcpu,16,&r32);
ret=do_xen_version((int )r32);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
//TODO:
VCPU *vcpu=current;
u64 va,lock, ret;
- vmx_vcpu_get_gr(vcpu,16,&va);
- vmx_vcpu_get_gr(vcpu,17,&lock);
+ vcpu_get_gr_nat(vcpu,16,&va);
+ vcpu_get_gr_nat(vcpu,17,&lock);
ret=do_lock_page(vcpu, va, lock);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
{
VCPU *vcpu=current;
u64 gpa,ret;
- vmx_vcpu_get_gr(vcpu,16,&gpa);
+ vcpu_get_gr_nat(vcpu,16,&gpa);
ret=do_set_shared_page(vcpu, gpa);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
{
VCPU *vcpu=current;
u64 r32,r33,r34,ret;
- vmx_vcpu_get_gr(vcpu,16,&r32);
- vmx_vcpu_get_gr(vcpu,17,&r33);
- vmx_vcpu_get_gr(vcpu,18,&r34);
+ vcpu_get_gr_nat(vcpu,16,&r32);
+ vcpu_get_gr_nat(vcpu,17,&r33);
+ vcpu_get_gr_nat(vcpu,18,&r34);
ret=do_grant_table_op((unsigned int)r32, (void *)r33, (unsigned int)r34);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
}
*/
IA64_PSR vpsr;
REGS * regs = vcpu_regs(vcpu);
vpsr.val = vmx_vcpu_get_psr(vcpu);
-
+ vcpu_bsw0(vcpu);
if(vpsr.ic){
- extern void vmx_dorfirfi(void);
- if (regs->cr_iip == *(unsigned long *)vmx_dorfirfi)
- panic("COLLECT interruption for vmx_dorfirfi\n");
/* Sync mpsr id/da/dd/ss/ed bits to vipsr
* since after guest do rfi, we still want these bits on in
vifs &= ~IA64_IFS_V;
vcpu_set_ifs(vcpu, vifs);
- vcpu_set_iipa(vcpu, regs->cr_iipa);
+ vcpu_set_iipa(vcpu, VMX(vcpu,cr_iipa));
}
vdcr = VCPU(vcpu,dcr);
vmx_vcpu_set_psr(vcpu, vpsr.val);
}
+
int
inject_guest_interruption(VCPU *vcpu, u64 vec)
{
/////////////////////////////////////////////////////////////////////////////////////////
// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
ENTRY(vmx_virtualization_fault)
- VMX_DBG_FAULT(37)
mov r31=pr
mov r19=37
+ adds r16 = IA64_VCPU_CAUSE_OFFSET,r21
+ adds r17 = IA64_VCPU_OPCODE_OFFSET,r21
+ ;;
+ st8 [r16] = r24
+ st8 [r17] = r25
+ ;;
br.sptk vmx_dispatch_virtualization_fault
END(vmx_virtualization_fault)
* r31: contains saved predicates (pr)
*/
VMX_SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,4,0
+ alloc r14=ar.pfs,0,0,5,0
mov out0=cr.ifa
mov out1=cr.isr
mov out2=cr.iim
mov out3=r15
-
+ adds r3=8,r2 // set up second base pointer
+ ;;
ssm psr.ic
;;
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i // restore psr.i
- adds r3=16,r2 // set up second base pointer
- ;;
- VMX_SAVE_REST
movl r14=ia64_leave_hypervisor
;;
+ VMX_SAVE_REST
mov rp=r14
+ ;;
+ adds out4=16,r12
br.call.sptk.many b6=vmx_reflect_interruption
END(vmx_dispatch_reflection)
ENTRY(vmx_dispatch_virtualization_fault)
VMX_SAVE_MIN_WITH_COVER_R19
;;
- alloc r14=ar.pfs,0,0,3,0 // now it's safe (must be first in insn group!)
+ alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
mov out0=r13 //vcpu
- mov out1=r4 //cause
- mov out2=r5 //opcode
+ adds r3=8,r2 // set up second base pointer
+ ;;
ssm psr.ic
;;
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i // restore psr.i
- adds r3=16,r2 // set up second base pointer
- ;;
- VMX_SAVE_REST
movl r14=ia64_leave_hypervisor
;;
+ VMX_SAVE_REST
mov rp=r14
+ ;;
+ adds out1=16,sp //regs
br.call.sptk.many b6=vmx_emulate
END(vmx_dispatch_virtualization_fault)
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i // restore psr.i
- adds r3=16,r2 // set up second base pointer
+ adds r3=8,r2 // set up second base pointer
;;
VMX_SAVE_REST
movl r14=ia64_leave_hypervisor
ENTRY(vmx_dispatch_tlb_miss)
VMX_SAVE_MIN_WITH_COVER_R19
alloc r14=ar.pfs,0,0,3,0
- mov out0=r13
+ mov out0=cr.ifa
mov out1=r15
- mov out2=cr.ifa
-
+ adds r3=8,r2 // set up second base pointer
+ ;;
ssm psr.ic
;;
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i // restore psr.i
- adds r3=16,r2 // set up second base pointer
- ;;
- VMX_SAVE_REST
movl r14=ia64_leave_hypervisor
;;
+ VMX_SAVE_REST
mov rp=r14
+ ;;
+ adds out2=16,r12
br.call.sptk.many b6=vmx_hpw_miss
END(vmx_dispatch_tlb_miss)
;;
alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
mov out0=cr.ifa
- adds out1=16,sp
mov out2=cr.isr // FIXME: pity to make this slow access twice
mov out3=cr.iim // FIXME: pity to make this slow access twice
-
+ adds r3=8,r2 // set up second base pointer
+ ;;
ssm psr.ic
;;
srlz.i // guarantee that interruption collection is on
;;
(p15)ssm psr.i // restore psr.i
- adds r3=16,r2 // set up second base pointer
- ;;
- VMX_SAVE_REST
movl r14=ia64_leave_hypervisor
;;
+ VMX_SAVE_REST
mov rp=r14
+ ;;
+ adds out1=16,sp
br.call.sptk.many b6=vmx_ia64_handle_break
;;
END(vmx_dispatch_break_fault)
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i // restore psr.i
- adds r3=16,r2 // set up second base pointer
+ adds r3=8,r2 // set up second base pointer
;;
VMX_SAVE_REST
;;
;;
alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
mov out0=cr.ivr // pass cr.ivr as first arg
- add out1=16,sp // pass pointer to pt_regs as second arg
-
+ adds r3=8,r2 // set up second base pointer for SAVE_REST
+ ;;
ssm psr.ic
;;
srlz.i
;;
(p15) ssm psr.i
- adds r3=16,r2 // set up second base pointer for SAVE_REST
- ;;
- VMX_SAVE_REST
movl r14=ia64_leave_hypervisor
;;
+ VMX_SAVE_REST
mov rp=r14
+ ;;
+ add out1=16,sp // pass pointer to pt_regs as second arg
br.call.sptk.many b6=vmx_ia64_handle_irq
END(vmx_dispatch_interrupt)
ld8 r25=[r25]; /* read vpd base */ \
ld8 r20=[r20]; /* read entry point */ \
;; \
- mov r6=r25; \
add r20=PAL_VPS_SYNC_READ,r20; \
;; \
{ .mii; \
br.cond.sptk b0; /* call the service */ \
;; \
}; \
- ld8 r7=[r22]; \
+ ld8 r17=[r22]; \
/* deposite ipsr bit cpl into vpd.vpsr, since epc will change */ \
extr.u r30=r16, IA64_PSR_CPL0_BIT, 2; \
;; \
- dep r7=r30, r7, IA64_PSR_CPL0_BIT, 2; \
- ;; \
+ dep r17=r30, r17, IA64_PSR_CPL0_BIT, 2; \
extr.u r30=r16, IA64_PSR_BE_BIT, 5; \
;; \
- dep r7=r30, r7, IA64_PSR_BE_BIT, 5; \
- ;; \
+ dep r17=r30, r17, IA64_PSR_BE_BIT, 5; \
extr.u r30=r16, IA64_PSR_RI_BIT, 2; \
;; \
- dep r7=r30, r7, IA64_PSR_RI_BIT, 2; \
+ dep r17=r30, r17, IA64_PSR_RI_BIT, 2; \
;; \
- st8 [r22]=r7; \
+ st8 [r22]=r17; \
;;
VMX_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
mov r27=ar.rsc; /* M */ \
mov r20=r1; /* A */ \
- mov r26=ar.unat; /* M */ \
+ mov r25=ar.unat; /* M */ \
mov r29=cr.ipsr; /* M */ \
+ mov r26=ar.pfs; /* I */ \
mov r18=cr.isr; \
COVER; /* B;; (or nothing) */ \
;; \
tbit.z p6,p0=r29,IA64_PSR_VM_BIT; \
+ ;; \
tbit.nz.or p6,p0 = r18,39; \
;; \
(p6) br.sptk.few vmx_panic; \
.mem.offset 0,0; st8.spill [r16]=r10,24; \
.mem.offset 8,0; st8.spill [r17]=r11,24; \
;; \
- mov r8=ar.pfs; /* I */ \
mov r9=cr.iip; /* M */ \
mov r10=ar.fpsr; /* M */ \
;; \
st8 [r17]=r30,16; /* save cr.ifs */ \
sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
;; \
- st8 [r16]=r26,16; /* save ar.unat */ \
- st8 [r17]=r8,16; /* save ar.pfs */ \
+ st8 [r16]=r25,16; /* save ar.unat */ \
+ st8 [r17]=r26,16; /* save ar.pfs */ \
shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
;; \
st8 [r16]=r27,16; /* save ar.rsc */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r2,16; \
.mem.offset 8,0; st8.spill [r17]=r3,16; \
- adds r2=PT(F6),r1; \
- ;; \
- .mem.offset 0,0; st8.spill [r16]=r4,16; \
- .mem.offset 8,0; st8.spill [r17]=r5,16; \
- ;; \
- .mem.offset 0,0; st8.spill [r16]=r6,16; \
- .mem.offset 8,0; st8.spill [r17]=r7,16; \
- mov r20=ar.ccv; \
- ;; \
- mov r18=cr.iipa; \
- mov r4=cr.isr; \
- mov r22=ar.unat; \
- ;; \
- st8 [r16]=r18,16; \
- st8 [r17]=r4; \
- ;; \
- adds r16=PT(EML_UNAT),r1; \
- adds r17=PT(AR_CCV),r1; \
- ;; \
- st8 [r16]=r22,8; \
- st8 [r17]=r20; \
- mov r4=r24; \
- mov r5=r25; \
+ adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
;; \
- st8 [r16]=r0; \
+ adds r16=IA64_VCPU_IIPA_OFFSET,r13; \
+ adds r17=IA64_VCPU_ISR_OFFSET,r13; \
+ mov r26=cr.iipa; \
+ mov r27=cr.isr; \
+ ;; \
+ st8 [r16]=r26; \
+ st8 [r17]=r27; \
+ ;; \
EXTRA; \
+ mov r8=ar.ccv; \
mov r9=ar.csd; \
mov r10=ar.ssd; \
movl r11=FPSR_DEFAULT; /* L-unit */ \
* psr.ic: on
* r2: points to &pt_regs.f6
* r3: points to &pt_regs.f7
- * r4,r5,scrach
- * r6: points to vpd
- * r7: vpsr
+ * r8: contents of ar.ccv
* r9: contents of ar.csd
* r10: contents of ar.ssd
* r11: FPSR_DEFAULT
* Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
*/
#define VMX_SAVE_REST \
- tbit.z pBN0,pBN1=r7,IA64_PSR_BN_BIT; /* guest bank0 or bank1 ? */ \
- ;; \
-(pBN0) add r4=VPD(VBGR),r6; \
-(pBN0) add r5=VPD(VBGR)+0x8,r6; \
-(pBN0) add r7=VPD(VBNAT),r6; \
- ;; \
-(pBN1) add r5=VPD(VGR)+0x8,r6; \
-(pBN1) add r4=VPD(VGR),r6; \
-(pBN1) add r7=VPD(VNAT),r6; \
- ;; \
-.mem.offset 0,0; st8.spill [r4]=r16,16; \
-.mem.offset 8,0; st8.spill [r5]=r17,16; \
+.mem.offset 0,0; st8.spill [r2]=r16,16; \
+.mem.offset 8,0; st8.spill [r3]=r17,16; \
;; \
-.mem.offset 0,0; st8.spill [r4]=r18,16; \
-.mem.offset 8,0; st8.spill [r5]=r19,16; \
+.mem.offset 0,0; st8.spill [r2]=r18,16; \
+.mem.offset 8,0; st8.spill [r3]=r19,16; \
;; \
-.mem.offset 0,0; st8.spill [r4]=r20,16; \
-.mem.offset 8,0; st8.spill [r5]=r21,16; \
+.mem.offset 0,0; st8.spill [r2]=r20,16; \
+.mem.offset 8,0; st8.spill [r3]=r21,16; \
+ mov r18=b6; \
;; \
-.mem.offset 0,0; st8.spill [r4]=r22,16; \
-.mem.offset 8,0; st8.spill [r5]=r23,16; \
+.mem.offset 0,0; st8.spill [r2]=r22,16; \
+.mem.offset 8,0; st8.spill [r3]=r23,16; \
+ mov r19=b7; \
;; \
-.mem.offset 0,0; st8.spill [r4]=r24,16; \
-.mem.offset 8,0; st8.spill [r5]=r25,16; \
+.mem.offset 0,0; st8.spill [r2]=r24,16; \
+.mem.offset 8,0; st8.spill [r3]=r25,16; \
;; \
-.mem.offset 0,0; st8.spill [r4]=r26,16; \
-.mem.offset 8,0; st8.spill [r5]=r27,16; \
+.mem.offset 0,0; st8.spill [r2]=r26,16; \
+.mem.offset 8,0; st8.spill [r3]=r27,16; \
;; \
-.mem.offset 0,0; st8.spill [r4]=r28,16; \
-.mem.offset 8,0; st8.spill [r5]=r29,16; \
- mov r26=b6; \
+.mem.offset 0,0; st8.spill [r2]=r28,16; \
+.mem.offset 8,0; st8.spill [r3]=r29,16; \
;; \
-.mem.offset 0,0; st8.spill [r4]=r30,16; \
-.mem.offset 8,0; st8.spill [r5]=r31,16; \
- mov r27=b7; \
+.mem.offset 0,0; st8.spill [r2]=r30,16; \
+.mem.offset 8,0; st8.spill [r3]=r31,32; \
;; \
- mov r30=ar.unat; \
- ;; \
- st8 [r7]=r30; \
- mov ar.fpsr=r11; /* M-unit */ \
+ mov ar.fpsr=r11; \
+ st8 [r2]=r8,8; \
+ adds r24=PT(B6)-PT(F7),r3; \
;; \
stf.spill [r2]=f6,32; \
stf.spill [r3]=f7,32; \
stf.spill [r2]=f8,32; \
stf.spill [r3]=f9,32; \
;; \
- stf.spill [r2]=f10; \
- stf.spill [r3]=f11; \
+ stf.spill [r2]=f10,32; \
+ stf.spill [r3]=f11,24; \
;; \
- adds r2=PT(B6)-PT(F10),r2; \
- adds r3=PT(B7)-PT(F11),r3; \
- ;; \
- st8 [r2]=r26,16; /* b6 */ \
- st8 [r3]=r27,16; /* b7 */ \
+.mem.offset 0,0; st8.spill [r2]=r4,16; \
+.mem.offset 8,0; st8.spill [r3]=r5,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2]=r6,16; \
+.mem.offset 8,0; st8.spill [r3]=r7; \
+ adds r25=PT(B7)-PT(R7),r3; \
;; \
- st8 [r2]=r9; /* ar.csd */ \
- st8 [r3]=r10; /* ar.ssd */ \
+ st8 [r24]=r18,16; /* b6 */ \
+ st8 [r25]=r19,16; /* b7 */ \
+ ;; \
+ st8 [r24]=r9; /* ar.csd */ \
+ mov r26=ar.unat; \
+ ;; \
+ st8 [r25]=r10; /* ar.ssd */ \
+ st8 [r2]=r26; /* eml_unat */ \
;;
#define VMX_SAVE_MIN_WITH_COVER VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,)
first_time = 0;
}
if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
- if (running_on_sim) do_ssc(vcpu_get_gr(current,36), regs);
- else do_ssc(vcpu_get_gr(current,36), regs);
+ if (running_on_sim) do_ssc(vcpu_get_gr_nat(current,36), regs);
+ else do_ssc(vcpu_get_gr_nat(current,36), regs);
}
#endif
if (iim == d->arch.breakimm) {
break;
case FW_HYPERCALL_SAL_CALL:
for (i = 0; i < 8; i++)
- vmx_vcpu_get_gr(v, 32+i, &sal_param[i]);
+ vcpu_get_gr_nat(v, 32+i, &sal_param[i]);
x = sal_emulator(sal_param[0], sal_param[1],
sal_param[2], sal_param[3],
sal_param[4], sal_param[5],
case FW_HYPERCALL_EFI_GET_TIME:
{
unsigned long *tv, *tc;
- vmx_vcpu_get_gr(v, 32, &tv);
- vmx_vcpu_get_gr(v, 33, &tc);
+ vcpu_get_gr_nat(v, 32, &tv);
+ vcpu_get_gr_nat(v, 33, &tc);
printf("efi_get_time(%p,%p) called...",tv,tc);
tv = __va(translate_domain_mpaddr(tv));
if (tc) tc = __va(translate_domain_mpaddr(tc));
pal_emul(current);
vmx_vcpu_increment_iip(current);
} else
- vmx_reflect_interruption(ifa,isr,iim,11);
+ vmx_reflect_interruption(ifa,isr,iim,11,regs);
}
static UINT64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000, 0x1400,0x1800,
void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
- UINT64 vector)
+ UINT64 vector,REGS *regs)
{
VCPU *vcpu = current;
- REGS *regs=vcpu_regs(vcpu);
UINT64 viha,vpsr = vmx_vcpu_get_psr(vcpu);
if(!(vpsr&IA64_PSR_IC)&&(vector!=5)){
panic("Guest nested fault!");
inject_guest_interruption(vcpu, vector);
}
+
+void save_banked_regs_to_vpd(VCPU *v, REGS *regs)
+{
+ unsigned long i, * src,* dst, *sunat, *dunat;
+ IA64_PSR vpsr;
+ src=®s->r16;
+ sunat=®s->eml_unat;
+ vpsr.val = vmx_vcpu_get_psr(v);
+ if(vpsr.bn){
+ dst = &VCPU(v, vgr[0]);
+ dunat =&VCPU(v, vnat);
+ __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;;
+ dep %2 = %0, %2, 0, 16;;
+ st8 [%3] = %2;;"
+ ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
+
+ }else{
+ dst = &VCPU(v, vbgr[0]);
+// dunat =&VCPU(v, vbnat);
+// __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;;
+// dep %2 = %0, %2, 16, 16;;
+// st8 [%3] = %2;;"
+// ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
+
+ }
+ for(i=0; i<16; i++)
+ *dst++ = *src++;
+}
+
+
// ONLY gets called from ia64_leave_kernel
// ONLY call with interrupts disabled?? (else might miss one?)
// NEVER successful if already reflecting a trap/fault because psr.i==0
if (!is_idle_task(d) ) { // always comes from guest
extern void vmx_dorfirfi(void);
struct pt_regs *user_regs = vcpu_regs(current);
-
if (local_softirq_pending())
do_softirq();
local_irq_disable();
VCPU(v, irr[0]) |= 1UL << 0x10;
v->arch.irq_new_pending = 1;
}
-
+
if ( v->arch.irq_new_pending ) {
v->arch.irq_new_pending = 0;
vmx_check_pending_irq(v);
}
+// if (VCPU(v,vac).a_bsw){
+// save_banked_regs_to_vpd(v,regs);
+// }
+
}
}
extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
/* We came here because the H/W VHPT walker failed to find an entry */
-void vmx_hpw_miss(VCPU *vcpu, u64 vec, u64 vadr)
+void vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
{
IA64_PSR vpsr;
CACHE_LINE_TYPE type;
REGS *regs;
thash_cb_t *vtlb, *vhpt;
thash_data_t *data, me;
- vtlb=vmx_vcpu_get_vtlb(vcpu);
+ VCPU *v = current;
+ vtlb=vmx_vcpu_get_vtlb(v);
#ifdef VTLB_DEBUG
check_vtlb_sanity(vtlb);
dump_vtlb(vtlb);
#endif
- vpsr.val = vmx_vcpu_get_psr(vcpu);
- regs = vcpu_regs(vcpu);
- misr.val=regs->cr_isr;
+ vpsr.val = vmx_vcpu_get_psr(v);
+ misr.val=VMX(v,cr_isr);
+
/* TODO
- if(vcpu->domain->id && vec == 2 &&
+ if(v->domain->id && vec == 2 &&
vpsr.dt == 0 && is_gpa_io(MASK_PMA(vaddr))){
emulate_ins(&v);
return;
*/
if((vec==1)&&(!vpsr.it)){
- physical_itlb_miss(vcpu, vadr);
+ physical_itlb_miss(v, vadr);
return;
}
if((vec==2)&&(!vpsr.dt)){
- if(vcpu->domain!=dom0&&__gpfn_is_io(vcpu->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
- emulate_io_inst(vcpu,((vadr<<1)>>1),4); // UC
+ if(v->domain!=dom0&&__gpfn_is_io(v->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
+ emulate_io_inst(v,((vadr<<1)>>1),4); // UC
}else{
- physical_dtlb_miss(vcpu, vadr);
+ physical_dtlb_miss(v, vadr);
}
return;
}
- vrr = vmx_vcpu_rr(vcpu,vadr);
+ vrr = vmx_vcpu_rr(v, vadr);
if(vec == 1) type = ISIDE_TLB;
else if(vec == 2) type = DSIDE_TLB;
else panic("wrong vec\n");
-// prepare_if_physical_mode(vcpu);
+// prepare_if_physical_mode(v);
if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){
- if(vcpu->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(vcpu->domain, data->ppn>>(PAGE_SHIFT-12))){
+ if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain, data->ppn>>(PAGE_SHIFT-12))){
vadr=(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
- emulate_io_inst(vcpu, vadr, data->ma);
+ emulate_io_inst(v, vadr, data->ma);
return IA64_FAULT;
}
if ( data->ps != vrr.ps ) {
- machine_tlb_insert(vcpu, data);
+ machine_tlb_insert(v, data);
}
else {
thash_insert(vtlb->ts->vhpt,data,vadr);
}
}else if(type == DSIDE_TLB){
- if(!vhpt_enabled(vcpu, vadr, misr.rs?RSE_REF:DATA_REF)){
+ if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
if(vpsr.ic){
- vcpu_set_isr(vcpu, misr.val);
- alt_dtlb(vcpu, vadr);
+ vcpu_set_isr(v, misr.val);
+ alt_dtlb(v, vadr);
return IA64_FAULT;
} else{
if(misr.sp){
//TODO lds emulation
panic("Don't support speculation load");
}else{
- nested_dtlb(vcpu);
+ nested_dtlb(v);
return IA64_FAULT;
}
}
} else{
- vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
- vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
+ vmx_vcpu_thash(v, vadr, &vhpt_adr);
+ vrr=vmx_vcpu_rr(v,vhpt_adr);
data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
if(data){
if(vpsr.ic){
- vcpu_set_isr(vcpu, misr.val);
- dtlb_fault(vcpu, vadr);
+ vcpu_set_isr(v, misr.val);
+ dtlb_fault(v, vadr);
return IA64_FAULT;
}else{
if(misr.sp){
//TODO lds emulation
panic("Don't support speculation load");
}else{
- nested_dtlb(vcpu);
+ nested_dtlb(v);
return IA64_FAULT;
}
}
}else{
if(vpsr.ic){
- vcpu_set_isr(vcpu, misr.val);
- dvhpt_fault(vcpu, vadr);
+ vcpu_set_isr(v, misr.val);
+ dvhpt_fault(v, vadr);
return IA64_FAULT;
}else{
if(misr.sp){
//TODO lds emulation
panic("Don't support speculation load");
}else{
- nested_dtlb(vcpu);
+ nested_dtlb(v);
return IA64_FAULT;
}
}
}
}
}else if(type == ISIDE_TLB){
- if(!vhpt_enabled(vcpu, vadr, misr.rs?RSE_REF:DATA_REF)){
+ if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
if(!vpsr.ic){
misr.ni=1;
}
- vcpu_set_isr(vcpu, misr.val);
- alt_itlb(vcpu, vadr);
+ vcpu_set_isr(v, misr.val);
+ alt_itlb(v, vadr);
return IA64_FAULT;
} else{
- vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
- vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
+ vmx_vcpu_thash(v, vadr, &vhpt_adr);
+ vrr=vmx_vcpu_rr(v,vhpt_adr);
data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
if(data){
if(!vpsr.ic){
misr.ni=1;
}
- vcpu_set_isr(vcpu, misr.val);
- itlb_fault(vcpu, vadr);
+ vcpu_set_isr(v, misr.val);
+ itlb_fault(v, vadr);
return IA64_FAULT;
}else{
if(!vpsr.ic){
misr.ni=1;
}
- vcpu_set_isr(vcpu, misr.val);
- ivhpt_fault(vcpu, vadr);
+ vcpu_set_isr(v, misr.val);
+ ivhpt_fault(v, vadr);
return IA64_FAULT;
}
}
UINT64 ifs, psr;
REGS *regs = vcpu_regs(vcpu);
psr = VCPU(vcpu,ipsr);
+ vcpu_bsw1(vcpu);
vmx_vcpu_set_psr(vcpu,psr);
ifs=VCPU(vcpu,ifs);
if((ifs>>63)&&(ifs<<1)){
return VCPU(vcpu,vpsr);
}
-
+#if 0
IA64FAULT
vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val)
{
return IA64_NO_FAULT;
}
-
-
+#endif
+#if 0
IA64FAULT
vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val)
{
int nat;
//TODO, Eddie
if (!regs) return 0;
+#if 0
if (reg >= 16 && reg < 32) {
return vmx_vcpu_get_bgr(vcpu,reg,val);
}
+#endif
getreg(reg,val,&nat,regs); // FIXME: handle NATs later
if(nat){
return IA64_FAULT;
if (!regs) return IA64_ILLOP_FAULT;
if (reg >= sof + 32) return IA64_ILLOP_FAULT;
+#if 0
if ( reg >= 16 && reg < 32 ) {
return vmx_vcpu_set_bgr(vcpu,reg, value, nat);
}
+#endif
setreg(reg,value,nat,regs);
return IA64_NO_FAULT;
}
+#endif
IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
{
/*
if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
- return vmx_vcpu_set_gr(vcpu, tgt, val);
+ return vcpu_set_gr(vcpu, tgt, val);
else return fault;
*/
val = vmx_vcpu_get_psr(vcpu);
val = (val & MASK(0, 32)) | (val & MASK(35, 2));
last_guest_psr = val;
- return vmx_vcpu_set_gr(vcpu, tgt, val, 0);
+ return vcpu_set_gr(vcpu, tgt, val, 0);
}
/**
{
UINT64 val;
IA64FAULT fault;
- if(vmx_vcpu_get_gr(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
+ if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
panic(" get_psr nat bit fault\n");
val = (val & MASK(0, 32)) | (VCPU(vcpu, vpsr) & MASK(32, 32));
return IA64_FAULT;
}
#endif // CHECK_FAULT
- return vmx_vcpu_bsw0(vcpu);
+ return vcpu_bsw0(vcpu);
}
IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif // CHECK_FAULT
- return vmx_vcpu_bsw1(vcpu);
+ return vcpu_bsw1(vcpu);
}
IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst)
privilege_op (vcpu);
return IA64_FAULT;
}
- if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&r2)){
+ if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
#ifdef VMAL_NO_FAULT_CHECK
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
return IA64_FAULT;
}
#endif // VMAL_NO_FAULT_CHECK
- if(vmx_vcpu_get_gr(vcpu,inst.M47.r3,&r3)){
+ if(vcpu_get_gr_nat(vcpu,inst.M47.r3,&r3)){
#ifdef VMAL_NO_FAULT_CHECK
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
return IA64_FAULT;
}
#endif // VMAL_NO_FAULT_CHECK
- ret1 = vmx_vcpu_get_gr(vcpu,inst.M45.r3,pr3);
- ret2 = vmx_vcpu_get_gr(vcpu,inst.M45.r2,pr2);
+ ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r3,pr3);
+ ret2 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pr2);
#ifdef VMAL_NO_FAULT_CHECK
if ( ret1 != IA64_NO_FAULT || ret2 != IA64_NO_FAULT ) {
set_isr_reg_nat_consumption(vcpu,0,0);
return IA64_FAULT;
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
+ if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
#ifdef CHECK_FAULT
- vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
+ vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
return IA64_NO_FAULT;
#endif //CHECK_FAULT
}
#ifdef CHECK_FAULT
if(unimplemented_gva(vcpu, r3)){
- vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
+ vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
return IA64_NO_FAULT;
}
#endif //CHECK_FAULT
vmx_vcpu_thash(vcpu, r3, &r1);
- vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
+ vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
return(IA64_NO_FAULT);
}
return IA64_FAULT;
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
+ if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
#ifdef CHECK_FAULT
- vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
+ vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
return IA64_NO_FAULT;
#endif //CHECK_FAULT
}
#ifdef CHECK_FAULT
if(unimplemented_gva(vcpu, r3)){
- vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
+ vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
return IA64_NO_FAULT;
}
#endif //CHECK_FAULT
vmx_vcpu_ttag(vcpu, r3, &r1);
- vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
+ vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
return(IA64_NO_FAULT);
}
return IA64_FAULT;
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
+ if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,1);
rnat_comsumption(vcpu);
if(vmx_vcpu_tpa(vcpu, r3, &r1)){
return IA64_FAULT;
}
- vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
+ vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
return(IA64_NO_FAULT);
}
return IA64_FAULT;
}
#endif
- if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
+ if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,1);
rnat_comsumption(vcpu);
if(vmx_vcpu_tak(vcpu, r3, &r1)){
return IA64_FAULT;
}
- vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
+ vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
return(IA64_NO_FAULT);
}
return IA64_FAULT;
}
#endif // VMAL_NO_FAULT_CHECK
- if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&slot)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&pte)){
+ if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
#ifdef VMAL_NO_FAULT_CHECK
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
return IA64_FAULT;
}
#endif // VMAL_NO_FAULT_CHECK
- if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&slot)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&pte)){
+ if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
#ifdef VMAL_NO_FAULT_CHECK
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
return IA64_FAULT;
}
#endif // VMAL_NO_FAULT_CHECK
- ret1 = vmx_vcpu_get_gr(vcpu,inst.M45.r2,pte);
+ ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pte);
#ifdef VMAL_NO_FAULT_CHECK
if( ret1 != IA64_NO_FAULT ){
set_isr_reg_nat_consumption(vcpu,0,0);
if(inst.M29.ar3!=44){
panic("Can't support ar register other than itc");
}
- if(vmx_vcpu_get_gr(vcpu,inst.M29.r2,&r2)){
+ if(vcpu_get_gr_nat(vcpu,inst.M29.r2,&r2)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
#endif // CHECK_FAULT
u64 r1;
vmx_vcpu_get_itc(vcpu,&r1);
- vmx_vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
+ vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
return IA64_NO_FAULT;
}
return IA64_FAULT;
}
#endif // CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
+ if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
return IA64_FAULT;
}
#endif // CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
+ if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
return IA64_FAULT;
}
#endif // CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
+ if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
return IA64_FAULT;
}
#endif // CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
+ if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
return IA64_FAULT;
}
#endif // CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
+ if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
return IA64_FAULT;
}
#endif // CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
+ if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
+ if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
}
#endif //CHECK_FAULT
vmx_vcpu_get_rr(vcpu,r3,&r1);
- return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
+ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst)
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
+ if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
}
#endif //CHECK_FAULT
vmx_vcpu_get_pkr(vcpu,r3,&r1);
- return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
+ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst)
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
+ if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
}
#endif //CHECK_FAULT
vmx_vcpu_get_dbr(vcpu,r3,&r1);
- return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
+ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst)
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
+ if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
}
#endif //CHECK_FAULT
vmx_vcpu_get_ibr(vcpu,r3,&r1);
- return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
+ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst)
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
+ if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
}
#endif //CHECK_FAULT
vmx_vcpu_get_pmc(vcpu,r3,&r1);
- return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
+ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
+ if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
}
#endif //CHECK_FAULT
vmx_vcpu_get_cpuid(vcpu,r3,&r1);
- return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
+ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif // CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu, inst.M32.r2, &r2)){
+ if(vcpu_get_gr_nat(vcpu, inst.M32.r2, &r2)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
#define cr_get(cr) \
((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
- vmx_vcpu_set_gr(vcpu, tgt, val,0):fault;
+ vcpu_set_gr(vcpu, tgt, val,0):fault;
#define vmx_cr_get(cr) \
((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
- vmx_vcpu_set_gr(vcpu, tgt, val,0):fault;
+ vcpu_set_gr(vcpu, tgt, val,0):fault;
IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
{
case 64:return vmx_cr_get(lid);
case 65:
vmx_vcpu_get_ivr(vcpu,&val);
- return vmx_vcpu_set_gr(vcpu,tgt,val,0);
+ return vcpu_set_gr(vcpu,tgt,val,0);
case 66:return vmx_cr_get(tpr);
- case 67:return vmx_vcpu_set_gr(vcpu,tgt,0L,0);
+ case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
case 68:return vmx_cr_get(irr0);
case 69:return vmx_cr_get(irr1);
case 70:return vmx_cr_get(irr2);
*/
void
-vmx_emulate(VCPU *vcpu, UINT64 cause, UINT64 opcode)
+vmx_emulate(VCPU *vcpu, REGS *regs)
{
IA64_BUNDLE bundle;
int slot;
IA64_SLOT_TYPE slot_type;
IA64FAULT status;
INST64 inst;
- REGS * regs;
- UINT64 iip;
- regs = vcpu_regs(vcpu);
+ UINT64 iip, cause, opcode;
iip = regs->cr_iip;
IA64_PSR vpsr;
+ cause = VMX(vcpu,cause);
+ opcode = VMX(vcpu,opcode);
+
/*
if (privop_trace) {
static long i = 400;
#else
inst.inst=opcode;
#endif /* BYPASS_VMAL_OPCODE */
- vcpu_set_regs(vcpu, regs);
/*
* Switch to actual virtual rid in rr0 and rr4,
* which is required by some tlb related instructions.
*cch = *hash_table;
*hash_table = vhpt_entry;
hash_table->next = cch;
+ if(hash_table->tag==hash_table->next->tag)
+ while(1);
+
}
- if(hash_table->tag==hash_table->next->tag)
- while(1);
}
return /*hash_table*/;
}
#ifdef CONFIG_VTI
vmx_init_all_rr(v);
if (d == dom0)
- VCPU(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);
+// VCPU(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);
+ regs->r28 = dom_fw_setup(d,saved_command_line,256L);
/* Virtual processor context setup */
VCPU(v, vpsr) = IA64_PSR_BN;
VCPU(v, dcr) = 0;
movl r14=ia64_leave_kernel
;;
mov rp=r14
- br.sptk.many ia64_prepare_handle_break
+// br.sptk.many ia64_prepare_handle_break
+ br.call.sptk.many b6=ia64_handle_break
END(dispatch_break_fault)
#endif
movl r14=ia64_leave_kernel
;;
mov rp=r14
- br.sptk.many ia64_prepare_handle_privop
+// br.sptk.many ia64_prepare_handle_privop
+ br.call.sptk.many b6=ia64_handle_privop
END(dispatch_privop_fault)
#endif
movl r14=ia64_leave_kernel
;;
mov rp=r14
- br.sptk.many ia64_prepare_handle_unaligned
+// br.sptk.many ia64_prepare_handle_unaligned
+ br.call.sptk.many b6=ia64_handle_unaligned
END(dispatch_unaligned_handler)
.org ia64_ivt+0x4c00
movl r14=ia64_leave_kernel
;;
mov rp=r14
- br.sptk.many ia64_prepare_handle_reflection
+// br.sptk.many ia64_prepare_handle_reflection
+ br.call.sptk.many b6=ia64_handle_reflection
END(dispatch_reflection)
#define SAVE_MIN_COVER_DONE DO_SAVE_MIN(,mov r30=cr.ifs,)
fault = vcpu_ttag(vcpu,vcpu_get_gr(vcpu,src-64),&padr);
else fault = vcpu_tpa(vcpu,vcpu_get_gr(vcpu,src),&padr);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M46.r1, padr);
+ return vcpu_set_gr(vcpu, inst.M46.r1, padr, 0);
else return fault;
}
fault = vcpu_thash(vcpu,vcpu_get_gr(vcpu,src-64),&key);
else fault = vcpu_tak(vcpu,vcpu_get_gr(vcpu,src),&key);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M46.r1, key);
+ return vcpu_set_gr(vcpu, inst.M46.r1, key,0);
else return fault;
}
if (inst.M29.r2 > 63 && inst.M29.ar3 < 8) { // privified mov from kr
UINT64 val;
if (vcpu_get_ar(vcpu,ar3,&val) != IA64_ILLOP_FAULT)
- return vcpu_set_gr(vcpu, inst.M29.r2-64, val);
+ return vcpu_set_gr(vcpu, inst.M29.r2-64, val,0);
else return IA64_ILLOP_FAULT;
}
else {
if (inst.M43.r1 > 63) { // privified mov from cpuid
fault = vcpu_get_cpuid(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1-64, val);
+ return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
}
else {
fault = vcpu_get_rr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1, val);
+ return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
}
return fault;
}
fault = vcpu_get_pkr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1, val);
+ return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
else return fault;
}
fault = vcpu_get_dbr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1, val);
+ return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
else return fault;
}
fault = vcpu_get_ibr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1, val);
+ return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
else return fault;
}
if (inst.M43.r1 > 63) { // privified mov from pmd
fault = vcpu_get_pmd(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1-64, val);
+ return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
}
else {
fault = vcpu_get_pmc(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1, val);
+ return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
}
return fault;
}
#define cr_get(cr) \
((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
- vcpu_set_gr(vcpu, tgt, val) : fault;
+ vcpu_set_gr(vcpu, tgt, val, 0) : fault;
IA64FAULT priv_mov_from_cr(VCPU *vcpu, INST64 inst)
{
case 64:return cr_get(lid);
case 65:return cr_get(ivr);
case 66:return cr_get(tpr);
- case 67:return vcpu_set_gr(vcpu,tgt,0L);
+ case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
case 68:return cr_get(irr0);
case 69:return cr_get(irr1);
case 70:return cr_get(irr2);
IA64FAULT fault;
if ((fault = vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, tgt, val);
+ return vcpu_set_gr(vcpu, tgt, val, 0);
else return fault;
}
break;
case SSC_GETCHAR:
retval = ia64_ssc(0,0,0,0,ssc);
- vcpu_set_gr(current,8,retval);
+ vcpu_set_gr(current,8,retval,0);
break;
case SSC_WAIT_COMPLETION:
if (arg0) { // metaphysical address
/**/ retval = 0;
}
else retval = -1L;
- vcpu_set_gr(current,8,retval);
+ vcpu_set_gr(current,8,retval,0);
break;
case SSC_OPEN:
arg1 = vcpu_get_gr(current,33); // access rights
retval = ia64_ssc(arg0,arg1,0,0,ssc);
}
else retval = -1L;
- vcpu_set_gr(current,8,retval);
+ vcpu_set_gr(current,8,retval,0);
break;
case SSC_WRITE:
case SSC_READ:
req->len = last_count;
}
else retval = -1L;
- vcpu_set_gr(current,8,retval);
+ vcpu_set_gr(current,8,retval,0);
//if (last_count >= PAGE_SIZE) printf("retval=%x\n",retval);
break;
case SSC_CONNECT_INTERRUPT:
(void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
break;
case SSC_NETDEV_PROBE:
- vcpu_set_gr(current,8,-1L);
+ vcpu_set_gr(current,8,-1L,0);
break;
default:
printf("ia64_handle_break: bad ssc code %lx, iip=%p, b0=%p... spinning\n",ssc,regs->cr_iip,regs->b0);
if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
panic("PREEMPT happen in multicall\n"); // Not support yet
} else {
- vcpu_set_gr(vcpu, 2, op);
+ vcpu_set_gr(vcpu, 2, op, 0);
for ( i = 0; i < nr_args; i++) {
switch (i) {
- case 0: vcpu_set_gr(vcpu, 14, va_arg(args, unsigned long));
+ case 0: vcpu_set_gr(vcpu, 14, va_arg(args, unsigned long), 0);
break;
- case 1: vcpu_set_gr(vcpu, 15, va_arg(args, unsigned long));
+ case 1: vcpu_set_gr(vcpu, 15, va_arg(args, unsigned long), 0);
break;
- case 2: vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long));
+ case 2: vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long), 0);
break;
- case 3: vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long));
+ case 3: vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long), 0);
break;
- case 4: vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long));
+ case 4: vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long), 0);
break;
default: panic("Too many args for hypercall continuation\n");
break;
//typedef struct domain VCPU;
// this def for vcpu_regs won't work if kernel stack is present
-#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs)
+//#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
+#define vcpu_regs(vcpu) (((struct pt_regs *) ((char *) (vcpu) + IA64_STK_OFFSET)) - 1)
#define PSCB(x,y) VCPU(x,y)
#define PSCBX(x,y) x->arch.y
/**************************************************************************
VCPU general register access routines
**************************************************************************/
-
+#ifdef XEN
UINT64
vcpu_get_gr(VCPU *vcpu, unsigned reg)
{
REGS *regs = vcpu_regs(vcpu);
UINT64 val;
-
if (!reg) return 0;
getreg(reg,&val,0,regs); // FIXME: handle NATs later
return val;
}
+IA64FAULT
+vcpu_get_gr_nat(VCPU *vcpu, unsigned reg, UINT64 *val)
+{
+ REGS *regs = vcpu_regs(vcpu);
+ int nat;
+ getreg(reg,val,&nat,regs); // FIXME: handle NATs later
+ if(nat)
+ return IA64_NAT_CONSUMPTION_VECTOR;
+ return 0;
+}
+// returns:
+// IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
+// IA64_NO_FAULT otherwise
+IA64FAULT
+vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value, int nat)
+{
+ REGS *regs = vcpu_regs(vcpu);
+ if (!reg) return IA64_ILLOP_FAULT;
+ long sof = (regs->cr_ifs) & 0x7f;
+ if (reg >= sof + 32) return IA64_ILLOP_FAULT;
+ setreg(reg,value,nat,regs); // FIXME: handle NATs later
+ return IA64_NO_FAULT;
+}
+#else
// returns:
// IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
// IA64_NO_FAULT otherwise
return IA64_NO_FAULT;
}
+#endif
/**************************************************************************
VCPU privileged application register access routines
**************************************************************************/
printf("vcpu_pend_interrupt: bad vector\n");
return;
}
-//#ifdef CONFIG_VTI
if ( VMX_DOMAIN(vcpu) ) {
set_bit(vector,VCPU(vcpu,irr));
} else
-//#endif // CONFIG_VTI
{
/* if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; */
if (test_bit(vector,PSCBX(vcpu,irr))) {
vcpu_thash(vcpu, address, &iha);
if (__copy_from_user(&pte, (void *)iha, sizeof(pte)) != 0)
- return IA64_VHPT_TRANS_VECTOR;
+ return IA64_VHPT_FAULT;
/*
* Optimisation: this VHPT walker aborts on not-present pages
/**************************************************************************
VCPU banked general register access routines
**************************************************************************/
+#define vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
+do{ \
+ __asm__ __volatile__ ( \
+ ";;extr.u %0 = %3,%6,16;;\n" \
+ "dep %1 = %0, %1, 0, 16;;\n" \
+ "st8 [%4] = %1\n" \
+ "extr.u %0 = %2, 16, 16;;\n" \
+ "dep %3 = %0, %3, %6, 16;;\n" \
+ "st8 [%5] = %3\n" \
+ ::"r"(i),"r"(*b1unat),"r"(*b0unat),"r"(*runat),"r"(b1unat), \
+ "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
+}while(0)
IA64FAULT vcpu_bsw0(VCPU *vcpu)
{
unsigned long *r = ®s->r16;
unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
- int i;
+ unsigned long *runat = ®s->eml_unat;
+ unsigned long *b0unat = &PSCB(vcpu,vbnat);
+ unsigned long *b1unat = &PSCB(vcpu,vnat);
- if (PSCB(vcpu,banknum)) {
- for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
- PSCB(vcpu,banknum) = 0;
- }
+ unsigned long i;
+
+ if(VMX_DOMAIN(vcpu)){
+ if(VCPU(vcpu,vpsr)&IA64_PSR_BN){
+ for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
+ vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
+ VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
+ }
+ }else{
+ if (PSCB(vcpu,banknum)) {
+ for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
+ vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
+ PSCB(vcpu,banknum) = 0;
+ }
+ }
return (IA64_NO_FAULT);
}
+#define vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
+do{ \
+ __asm__ __volatile__ ( \
+ ";;extr.u %0 = %3,%6,16;;\n" \
+ "dep %1 = %0, %1, 16, 16;;\n" \
+ "st8 [%4] = %1\n" \
+ "extr.u %0 = %2, 0, 16;;\n" \
+ "dep %3 = %0, %3, %6, 16;;\n" \
+ "st8 [%5] = %3\n" \
+ ::"r"(i),"r"(*b0unat),"r"(*b1unat),"r"(*runat),"r"(b0unat), \
+ "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
+}while(0)
+
IA64FAULT vcpu_bsw1(VCPU *vcpu)
{
// TODO: Only allowed for current vcpu
unsigned long *r = ®s->r16;
unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
- int i;
+ unsigned long *runat = ®s->eml_unat;
+ unsigned long *b0unat = &PSCB(vcpu,vbnat);
+ unsigned long *b1unat = &PSCB(vcpu,vnat);
- if (!PSCB(vcpu,banknum)) {
- for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
- PSCB(vcpu,banknum) = 1;
- }
+ unsigned long i;
+
+ if(VMX_DOMAIN(vcpu)){
+ if(!(VCPU(vcpu,vpsr)&IA64_PSR_BN)){
+ for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
+ vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
+ VCPU(vcpu,vpsr) |= IA64_PSR_BN;
+ }
+ }else{
+ if (!PSCB(vcpu,banknum)) {
+ for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
+ vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
+ PSCB(vcpu,banknum) = 1;
+ }
+ }
return (IA64_NO_FAULT);
}
#define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00
#define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000
-#define IA64_NO_FAULT 0x0001
+#define IA64_NO_FAULT 0x0000
+#define IA64_FAULT 0x0001
#define IA64_RFI_IN_PROGRESS 0x0002
#define IA64_RETRY 0x0003
-#ifdef CONFIG_VTI
-#undef IA64_NO_FAULT
-#define IA64_NO_FAULT 0x0000
-#define IA64_FAULT 0x0001
-#endif //CONFIG_VTI
#define IA64_FORCED_IFA 0x0004
#define IA64_ILLOP_FAULT (IA64_GENEX_VECTOR | 0x00)
#define IA64_PRIVOP_FAULT (IA64_GENEX_VECTOR | 0x10)
#define IA64_DISIST_FAULT (IA64_GENEX_VECTOR | 0x40)
#define IA64_ILLDEP_FAULT (IA64_GENEX_VECTOR | 0x80)
#define IA64_DTLB_FAULT (IA64_DATA_TLB_VECTOR)
-
+#define IA64_VHPT_FAULT (IA64_VHPT_TRANS_VECTOR | 0x10)
#if !defined(__ASSEMBLY__)
typedef unsigned long IA64FAULT;
typedef unsigned long IA64INTVECTOR;
#define _XEN_IA64_PRIVOP_H
#include <asm/ia64_int.h>
-#ifdef CONFIG_VTI
+//#ifdef CONFIG_VTI
#include <asm/vmx_vcpu.h>
-#else //CONFIG_VTI
+//#else //CONFIG_VTI
#include <asm/vcpu.h>
-#endif //CONFIG_VTI
+//#endif //CONFIG_VTI
typedef unsigned long IA64_INST;
typedef union U_INST64_M35 {
IA64_INST inst;
struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
-
+
} INST64_M35;
typedef union U_INST64_M36 {
/* general registers */
extern UINT64 vcpu_get_gr(VCPU *vcpu, unsigned reg);
-extern IA64FAULT vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value);
+extern IA64FAULT vcpu_get_gr_nat(VCPU *vcpu, unsigned reg, UINT64 *val);
+extern IA64FAULT vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value, int nat);
/* application registers */
extern IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val);
/* psr */
extern UINT64 vmx_vcpu_get_psr(VCPU *vcpu);
extern IA64FAULT vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val);
extern IA64FAULT vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat);
+#if 0
extern IA64FAULT vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val);
extern IA64FAULT vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat);
+#endif
extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24);
extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24);
extern IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
/**************************************************************************
VCPU banked general register access routines
**************************************************************************/
+#if 0
static inline
IA64FAULT vmx_vcpu_bsw0(VCPU *vcpu)
{
VCPU(vcpu,vpsr) |= IA64_PSR_BN;
return (IA64_NO_FAULT);
}
+#endif
#if 0
/* Another hash performance algorithm */
#define redistribute_rid(rid) (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
vtime_t vtm;
unsigned long vrr[8];
unsigned long vkr[8];
+ unsigned long cr_iipa; /* for emulation */
+ unsigned long cr_isr; /* for emulation */
+ unsigned long cause;
+ unsigned long opcode;
+
// unsigned long mrr5;
// unsigned long mrr6;
// unsigned long mrr7;
#define IA64_TR_VHPT 4 /* dtr4: vhpt */
#define IA64_TR_ARCH_INFO 5
-#ifdef CONFIG_VTI
-#define IA64_TR_VHPT_IN_DOM 5 /* dtr5: Double mapping for vhpt table in domain space */
-#define IA64_TR_XEN_IN_DOM 6 /* itr6, dtr6: Double mapping for xen image in domain space */
-#define IA64_TR_RR7_SWITCH_STUB 7 /* dtr7: mapping for rr7 switch stub */
-#define IA64_TEMP_PHYSICAL 8 /* itr8, dtr8: temp mapping for guest physical memory 256M */
-#endif // CONFIG_VTI
-
/* Processor status register bits: */
#define IA64_PSR_VM_BIT 46
#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT)
/* Define HV space hierarchy */
#define XEN_VIRT_SPACE_LOW 0xe800000000000000
#define XEN_VIRT_SPACE_HIGH 0xf800000000000000
-/* This is address to mapping rr7 switch stub, in region 5 */
-#ifdef CONFIG_VTI
-#define XEN_RR7_SWITCH_STUB 0xb700000000000000
-#endif // CONFIG_VTI
#define XEN_START_ADDR 0xf000000000000000
#define HYPERVISOR_VIRT_START 0xf000000000000000
unsigned long r14; /* scratch */
unsigned long r2; /* scratch */
unsigned long r3; /* scratch */
-
- union {
- struct {
- /* The following registers are saved by SAVE_REST: */
- unsigned long r16; /* scratch */
- unsigned long r17; /* scratch */
- unsigned long r18; /* scratch */
- unsigned long r19; /* scratch */
- unsigned long r20; /* scratch */
- unsigned long r21; /* scratch */
- unsigned long r22; /* scratch */
- unsigned long r23; /* scratch */
- unsigned long r24; /* scratch */
- unsigned long r25; /* scratch */
- unsigned long r26; /* scratch */
- unsigned long r27; /* scratch */
- unsigned long r28; /* scratch */
- unsigned long r29; /* scratch */
- unsigned long r30; /* scratch */
- unsigned long r31; /* scratch */
- };
- struct {
- unsigned long r4; /* preserved */
- unsigned long r5; /* preserved */
- unsigned long r6; /* preserved */
- unsigned long r7; /* preserved */
- unsigned long cr_iipa; /* for emulation */
- unsigned long cr_isr; /* for emulation */
- unsigned long eml_unat; /* used for emulating instruction */
- unsigned long rfi_pfs; /* used for elulating rfi */
- };
- };
+ unsigned long r16; /* scratch */
+ unsigned long r17; /* scratch */
+ unsigned long r18; /* scratch */
+ unsigned long r19; /* scratch */
+ unsigned long r20; /* scratch */
+ unsigned long r21; /* scratch */
+ unsigned long r22; /* scratch */
+ unsigned long r23; /* scratch */
+ unsigned long r24; /* scratch */
+ unsigned long r25; /* scratch */
+ unsigned long r26; /* scratch */
+ unsigned long r27; /* scratch */
+ unsigned long r28; /* scratch */
+ unsigned long r29; /* scratch */
+ unsigned long r30; /* scratch */
+ unsigned long r31; /* scratch */
unsigned long ar_ccv; /* compare/exchange value (scratch) */
/*
struct pt_fpreg f9; /* scratch */
struct pt_fpreg f10; /* scratch */
struct pt_fpreg f11; /* scratch */
+ unsigned long r4; /* preserved */
+ unsigned long r5; /* preserved */
+ unsigned long r6; /* preserved */
+ unsigned long r7; /* preserved */
+ unsigned long eml_unat; /* used for emulating instruction */
+ unsigned long rfi_pfs; /* used for elulating rfi */
+
}cpu_user_regs_t;
typedef union {